bitkeeper revision 1.1236.1.153 (42496bbcmDKIhBdeW5tCa1moqo8b-Q)
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Tue, 29 Mar 2005 14:52:44 +0000 (14:52 +0000)
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Tue, 29 Mar 2005 14:52:44 +0000 (14:52 +0000)
Lazy pagetable and LDT switching was unsafe -- do them synchronously
for now, and maybe think about batching across switch_mm and switch_to
in the future (particularly for 2.6, which we care about more than 2.4).
Signed-off-by: Keir Fraser <keir@xensource.com>
linux-2.4.29-xen-sparse/arch/xen/kernel/ldt.c
linux-2.4.29-xen-sparse/arch/xen/kernel/process.c
linux-2.4.29-xen-sparse/arch/xen/mm/fault.c
linux-2.4.29-xen-sparse/include/asm-xen/mmu_context.h

index 79ac73960d46a2a58bfe33efd2b51b77bdb101d9..6235778493b23ae5041a4a12f2d94d6416ee596f 100644 (file)
@@ -117,8 +117,6 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 void destroy_context(struct mm_struct *mm)
 {
        if (mm->context.size) {
-               if (mm_state_sync & STATE_SYNC_LDT)
-                       clear_LDT();
                make_pages_writable(
                        mm->context.ldt, 
                        (mm->context.size*LDT_ENTRY_SIZE)/PAGE_SIZE);
index 478184f504985d592cdbf70e38c0ddaad842af18..c9d553627f584a7cdb21d17ce8f12151f81bc833 100644 (file)
@@ -305,35 +305,6 @@ void fastcall __switch_to(struct task_struct *prev_p, struct task_struct *next_p
     struct thread_struct *next = &next_p->thread;
     physdev_op_t op;
     multicall_entry_t _mcl[8], *mcl = _mcl;
-    mmu_update_t _mmu[2], *mmu = _mmu;
-
-    if ( mm_state_sync & STATE_SYNC_PT )
-    {
-        mmu->ptr = virt_to_machine(cur_pgd) | MMU_EXTENDED_COMMAND;
-        mmu->val = MMUEXT_NEW_BASEPTR;
-        mmu++;
-    }
-
-    if ( mm_state_sync & STATE_SYNC_LDT )
-    {
-        __asm__ __volatile__ ( 
-            "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs" : : : "eax" );
-        mmu->ptr = (unsigned long)next_p->mm->context.ldt |
-            MMU_EXTENDED_COMMAND;
-        mmu->val = (next_p->mm->context.size << MMUEXT_CMD_SHIFT) |
-            MMUEXT_SET_LDT;
-        mmu++;
-    }
-
-    if ( mm_state_sync != 0 )
-    {
-        mcl->op      = __HYPERVISOR_mmu_update;
-        mcl->args[0] = (unsigned long)_mmu;
-        mcl->args[1] = mmu - _mmu;
-        mcl->args[2] = 0;
-        mcl++;
-        mm_state_sync = 0;
-    }
 
     /*
      * This is basically 'unlazy_fpu', except that we queue a multicall to 
index 1fd1b4c1494066266150f77dea8ae9a3c8ec4cf3..7db6463e0924444a0563fde38a8844d47003f106 100644 (file)
@@ -28,7 +28,6 @@
 extern void die(const char *,struct pt_regs *,long);
 
 pgd_t *cur_pgd;
-int mm_state_sync;
 
 extern spinlock_t timerlist_lock;
 
index 60c245e408c5f5fac0649fcd3673edd88b6a24ac..2aea8e2f1a0c2710e9984374b5b8b9aa75452467 100644 (file)
@@ -28,9 +28,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk,
 #endif
 
 extern pgd_t *cur_pgd;
-extern int mm_state_sync;
-#define STATE_SYNC_PT  1
-#define STATE_SYNC_LDT 2
 
 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
 {
@@ -39,23 +36,16 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, str
                clear_bit(cpu, &prev->cpu_vm_mask);
                /* Re-load page tables */
                cur_pgd = next->pgd;
-               mm_state_sync |= STATE_SYNC_PT;
+               xen_pt_switch(__pa(cur_pgd));
                /* load_LDT, if either the previous or next thread
                 * has a non-default LDT.
                 */
                if (next->context.size+prev->context.size)
-                       mm_state_sync |= STATE_SYNC_LDT;
+                       load_LDT(&next->context);
        }
 }
 
-#define activate_mm(prev, next)                                 \
-do {                                                            \
-       switch_mm((prev),(next),NULL,smp_processor_id());       \
-       if (mm_state_sync & STATE_SYNC_PT)                      \
-               xen_pt_switch(__pa(cur_pgd));                   \
-       if (mm_state_sync & STATE_SYNC_LDT)                     \
-               load_LDT(&(next)->context);                     \
-       mm_state_sync = 0;                                      \
-} while ( 0 )
+#define activate_mm(prev, next)        \
+       switch_mm((prev),(next),NULL,smp_processor_id())
 
 #endif